*/
static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
{
+ int ret;
+ struct cpupool *old;
+
if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
return -EBUSY;
+ old = per_cpu(cpupool, cpu);
per_cpu(cpupool, cpu) = c;
- schedule_cpu_switch(cpu, c);
+ ret = schedule_cpu_switch(cpu, c);
+ if ( ret )
+ {
+ per_cpu(cpupool, cpu) = old;
+ return ret;
+ }
+
cpu_clear(cpu, cpupool_free_cpus);
if (cpupool_moving_cpu == cpu)
{
cpu_set(cpu, cpupool_free_cpus);
if ( !ret )
{
- schedule_cpu_switch(cpu, NULL);
+ ret = schedule_cpu_switch(cpu, NULL);
+ if ( ret )
+ {
+ cpu_clear(cpu, cpupool_free_cpus);
+ goto out;
+ }
per_cpu(cpupool, cpu) = NULL;
cpupool_moving_cpu = -1;
cpupool_put(cpupool_cpu_moving);
cpupool_cpu_moving = NULL;
}
+
+out:
spin_unlock(&cpupool_lock);
return ret;
}
BUG();
}
-void schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
+int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
{
unsigned long flags;
struct vcpu *idle;
struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
if ( old_ops == new_ops )
- return;
+ return 0;
idle = idle_vcpu[cpu];
ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
+ if ( ppriv == NULL )
+ return -ENOMEM;
vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv);
+ if ( vpriv == NULL )
+ {
+ SCHED_OP(new_ops, free_pdata, ppriv, cpu);
+ return -ENOMEM;
+ }
pcpu_schedule_lock_irqsave(cpu, flags);
SCHED_OP(old_ops, free_vdata, vpriv_old);
SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
+
+ return 0;
}
struct scheduler *scheduler_get_default(void)
struct scheduler *scheduler_get_default(void);
struct scheduler *scheduler_alloc(unsigned int sched_id, int *perr);
void scheduler_free(struct scheduler *sched);
-void schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
+int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
void vcpu_force_reschedule(struct vcpu *v);
int cpu_disable_scheduler(unsigned int cpu);
int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);